\n \n \n
\n
\n\n \n \n Navjot Singh; Deepesh Data; Jemin George; and Suhas Diggavi.\n\n\n \n \n \n \n \n SPARQ-SGD: Event-Triggered and Compressed Communication in Decentralized Optimization.\n \n \n \n \n\n\n \n\n\n\n
IEEE Trans. Autom. Control., 68(2): 721–736. 2023.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n \n arxiv\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@ARTICLE{9691792, \nauthor={Singh, Navjot and Data, Deepesh and George, Jemin and Diggavi, Suhas}, journal={IEEE Transactions on Automatic Control}, \ntitle={SPARQ-SGD: Event-Triggered and Compressed Communication in Decentralized Optimization}, \njournal={{IEEE} Trans. Autom. Control.}, \nvolume = {68},\nnumber = {2},\npages = {721--736},\nyear = {2023},\nurl = {https://doi.org/10.1109/TAC.2022.3145576},\ndoi = {10.1109/TAC.2022.3145576},\nabstract={In this paper, we propose and analyze SPARQ-SGD, a communication efficient algorithm for decentralized training of large-scale machine learning models over a graph with n nodes, where communication efficiency is achieved using compressed exchange of local model parameters among neighboring nodes, which is triggered only when an event (a locally computable condition) is satisfied. Specifically, in SPARQ-SGD, each node takes a fixed number of local gradient steps and then checks if the model parameters have significantly changed compared to its last update; only when the change is beyond a certain threshold (specified by a design criterion), it compresses its local model parameters using both quantization and sparsification and communicates them to its neighbors. We prove that SPARQ-SGD converges as O(1/nT) and O(1/sqrt(nT)) in the strongly-convex and non-convex settings, respectively, matching the convergence rates of plain decentralized SGD. This demonstrates that we get communication efficiency achieved by aggressive compression, local iterations, and event-triggered communication essentially for free.}, \nkeywords={}, \ndoi={10.1109/TAC.2022.3145576}, \nISSN={1558-2523}, \nmonth={},\ntype={2},\ntags={journal,DML,CEDL},\nurl_arxiv={https://arxiv.org/abs/1910.14280},\n}\n\n\n
\n
\n\n\n
\n In this paper, we propose and analyze SPARQ-SGD, a communication efficient algorithm for decentralized training of large-scale machine learning models over a graph with n nodes, where communication efficiency is achieved using compressed exchange of local model parameters among neighboring nodes, which is triggered only when an event (a locally computable condition) is satisfied. Specifically, in SPARQ-SGD, each node takes a fixed number of local gradient steps and then checks if the model parameters have significantly changed compared to its last update; only when the change is beyond a certain threshold (specified by a design criterion), it compresses its local model parameters using both quantization and sparsification and communicates them to its neighbors. We prove that SPARQ-SGD converges as O(1/nT) and O(1/sqrt(nT)) in the strongly-convex and non-convex settings, respectively, matching the convergence rates of plain decentralized SGD. This demonstrates that we get communication efficiency achieved by aggressive compression, local iterations, and event-triggered communication essentially for free.\n
\n\n\n
\n\n\n
\n
\n\n \n \n Xuanyu Cao; Tamer Başar; Suhas Diggavi; Yonina C. Eldar; Khaled B. Letaief; H. Vincent Poor; and Junshan Zhang.\n\n\n \n \n \n \n Guest Editorial Communication-Efficient Distributed Learning Over Networks.\n \n \n \n\n\n \n\n\n\n
IEEE Journal on Selected Areas in Communications, 41(4): 845-850. April 2023.\n
\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@ARTICLE{10075678,\n author={Cao, Xuanyu and Başar, Tamer and Diggavi, Suhas and Eldar, Yonina C. and Letaief, Khaled B. and Poor, H. Vincent and Zhang, Junshan},\n journal={IEEE Journal on Selected Areas in Communications}, \n title={Guest Editorial Communication-Efficient Distributed Learning Over Networks}, \n year={2023},\n volume={41},\n number={4},\n pages={845-850},\n abstract={Distributed machine learning is envisioned as the bedrock of future intelligent networks, where agents exchange information with each other to train models collaboratively without uploading data to a central processor. Despite its broad applicability, a downside of distributed learning is the need for iterative information exchange between agents, which may lead to high communication overhead unaffordable in many practical systems with limited communication resources. To resolve this communication bottleneck, we need to devise communication-efficient distributed learning algorithms and protocols that can reduce the communication cost and simultaneously achieve satisfactory learning/optimization performance. Accomplishing this goal necessitates synergistic techniques from a diverse set of fields, including optimization, machine learning, wireless communications, game theory, and network/graph theory. This Special Issue is dedicated to communication-efficient distributed learning from multiple perspectives, including fundamental theories, algorithm design and analysis, and practical considerations.},\n keywords={},\n doi={10.1109/JSAC.2023.3241848},\n ISSN={1558-0008},\n month={April},\n type={2},\n tags={journal,CEDL,DML},\n}\n\n
\n
\n\n\n
\n Distributed machine learning is envisioned as the bedrock of future intelligent networks, where agents exchange information with each other to train models collaboratively without uploading data to a central processor. Despite its broad applicability, a downside of distributed learning is the need for iterative information exchange between agents, which may lead to high communication overhead unaffordable in many practical systems with limited communication resources. To resolve this communication bottleneck, we need to devise communication-efficient distributed learning algorithms and protocols that can reduce the communication cost and simultaneously achieve satisfactory learning/optimization performance. Accomplishing this goal necessitates synergistic techniques from a diverse set of fields, including optimization, machine learning, wireless communications, game theory, and network/graph theory. This Special Issue is dedicated to communication-efficient distributed learning from multiple perspectives, including fundamental theories, algorithm design and analysis, and practical considerations.\n
\n\n\n
\n\n\n
\n
\n\n \n \n Xuanyu Cao; Tamer Basar; Suhas N. Diggavi; Yonina C. Eldar; Khaled B. Letaief; H. Vincent Poor; and Junshan Zhang.\n\n\n \n \n \n \n \n Communication-Efficient Distributed Learning: An Overview.\n \n \n \n \n\n\n \n\n\n\n
IEEE J. Sel. Areas Commun., 41(4): 851–873. 2023.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{DBLP:journals/jsac/CaoBDELPZ23a,\n author = {Xuanyu Cao and\n Tamer Basar and\n Suhas N. Diggavi and\n Yonina C. Eldar and\n Khaled B. Letaief and\n H. Vincent Poor and\n Junshan Zhang},\n title = {Communication-Efficient Distributed Learning: An Overview},\n journal = {{IEEE} J. Sel. Areas Commun.},\n volume = {41},\n number = {4},\n pages = {851--873},\n year = {2023},\n url = {https://doi.org/10.1109/JSAC.2023.3242710},\n doi = {10.1109/JSAC.2023.3242710},\n timestamp = {Tue, 28 Mar 2023 19:50:24 +0200},\n biburl = {https://dblp.org/rec/journals/jsac/CaoBDELPZ23a.bib},\n type = {2},\n tags = {journal,CEDL,DML}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n